#endif
/* Flush the given writable p.t. page and write-protect it again. */
- void ptwr_flush(const int which)
+ void ptwr_flush(struct domain *d, const int which)
{
- unsigned long sstat, spte, pte, *ptep, l1va;
- l1_pgentry_t *sl1e = NULL, *pl1e, ol1e, nl1e;
+ unsigned long pte, *ptep, l1va;
+ l1_pgentry_t *pl1e, ol1e, nl1e;
l2_pgentry_t *pl2e;
- int i, cpu = smp_processor_id();
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ int i;
unsigned int modified = 0;
- l1va = ptwr_info[cpu].ptinfo[which].l1va;
+ // not supported in combination with various shadow modes!
+ ASSERT( !shadow_mode_enabled(d) );
+
+ l1va = d->arch.ptwr[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
/*
put_page_from_l1e(ol1e, d);
}
unmap_domain_mem(pl1e);
-
+
perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
- ptwr_info[cpu].ptinfo[which].prev_exec_domain = ed;
- ptwr_info[cpu].ptinfo[which].prev_nr_updates = modified;
+ d->arch.ptwr[which].prev_nr_updates = modified;
/*
* STEP 3. Reattach the L1 p.t. page into the current address space.
*/
- if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
+ if ( which == PTWR_PT_ACTIVE )
{
- pl2e = &__linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
- pl2e = &linear_l2_table[d->arch.ptwr[which].l2_idx];
++ pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
}
* STEP 4. Final tidy-up.
*/
- ptwr_info[cpu].ptinfo[which].l1va = 0;
+ d->arch.ptwr[which].l1va = 0;
-
- if ( unlikely(sl1e != NULL) )
- {
- unmap_domain_mem(sl1e);
- put_shadow_status(d);
- }
}
static int ptwr_emulated_update(
};
/* Write page fault handler: check if guest is trying to modify a PTE. */
- int ptwr_do_page_fault(unsigned long addr)
+ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
{
- unsigned long pte, pfn, l2e;
- struct pfn_info *page;
- l2_pgentry_t *pl2e;
- int which, cpu = smp_processor_id();
- u32 l2_idx;
- struct exec_domain *ed = current;
+ unsigned long pte, pfn, l2e;
+ struct pfn_info *page;
+ l2_pgentry_t *pl2e;
+ int which;
+ u32 l2_idx;
- /* Can't use linear_l2_table with external tables. */
- BUG_ON(shadow_mode_external(d));
+ if ( unlikely(shadow_mode_enabled(ed->domain)) )
+ return 0;
/*
* Attempt to read the PTE that maps the VA being accessed. By checking for
* If last batch made no updates then we are probably stuck. Emulate this
* update to ensure we make progress.
*/
- if ( (ptwr_info[cpu].ptinfo[which].prev_exec_domain == ed) &&
- (ptwr_info[cpu].ptinfo[which].prev_nr_updates == 0) )
- {
- /* Force non-emul next time, or we can get stuck emulating forever. */
- ptwr_info[cpu].ptinfo[which].prev_exec_domain = NULL;
+ if ( d->arch.ptwr[which].prev_nr_updates == 0 )
goto emulate;
- }
- ptwr_info[cpu].ptinfo[which].l1va = addr | 1;
- ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx;
+ d->arch.ptwr[which].l1va = addr | 1;
+ d->arch.ptwr[which].l2_idx = l2_idx;
/* For safety, disconnect the L1 p.t. page from current space. */
- if ( (which == PTWR_PT_ACTIVE) &&
- likely(!shadow_mode_enabled(d)) )
+ if ( which == PTWR_PT_ACTIVE )
{
*pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
local_flush_tlb(); /* XXX Multi-CPU guests? */
if ( unlikely(__put_user(pte, (unsigned long *)
&linear_pg_table[addr>>PAGE_SHIFT])) )
{
- MEM_LOG("ptwr: Could not update pte at %p\n", (unsigned long *)
+ MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
&linear_pg_table[addr>>PAGE_SHIFT]);
/* Toss the writable pagetable state and crash. */
- unmap_domain_mem(ptwr_info[cpu].ptinfo[which].pl1e);
- ptwr_info[cpu].ptinfo[which].l1va = 0;
+ unmap_domain_mem(d->arch.ptwr[which].pl1e);
+ d->arch.ptwr[which].l1va = 0;
domain_crash();
return 0;
}
#define PTWR_CLEANUP_ACTIVE 1
#define PTWR_CLEANUP_INACTIVE 2
- void ptwr_flush(const int);
- int ptwr_do_page_fault(unsigned long);
-
- int new_guest_cr3(unsigned long pfn);
- void propagate_page_fault(unsigned long addr, u16 error_code);
+ int ptwr_init(struct domain *);
+ void ptwr_destroy(struct domain *);
+ void ptwr_flush(struct domain *, const int);
+ int ptwr_do_page_fault(struct domain *, unsigned long);
- #define __cleanup_writable_pagetable(_what) \
- do { \
- int cpu = smp_processor_id(); \
- if ((_what) & PTWR_CLEANUP_ACTIVE) \
- if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) \
- ptwr_flush(PTWR_PT_ACTIVE); \
- if ((_what) & PTWR_CLEANUP_INACTIVE) \
- if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va) \
- ptwr_flush(PTWR_PT_INACTIVE); \
- } while ( 0 )
-
- #define cleanup_writable_pagetable(_d) \
- do { \
- if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
- __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE | \
- PTWR_CLEANUP_INACTIVE); \
+ #define cleanup_writable_pagetable(_d) \
+ do { \
+ if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) { \
+ if ( (_d)->arch.ptwr[PTWR_PT_ACTIVE].l1va ) \
+ ptwr_flush((_d), PTWR_PT_ACTIVE); \
+ if ( (_d)->arch.ptwr[PTWR_PT_INACTIVE].l1va ) \
+ ptwr_flush((_d), PTWR_PT_INACTIVE); \
+ } \
} while ( 0 )
+int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
+
#ifndef NDEBUG
-void audit_domain(struct domain *d);
+
+#define AUDIT_ALREADY_LOCKED ( 1u << 0 )
+#define AUDIT_ERRORS_OK ( 1u << 1 )
+#define AUDIT_QUIET ( 1u << 2 )
+
+void _audit_domain(struct domain *d, int flags);
+#define audit_domain(_d) _audit_domain((_d), 0)
void audit_domains(void);
+
#else
-#define audit_domain(_d) ((void)0)
-#define audit_domains() ((void)0)
+
+#define _audit_domain(_d, _f) ((void)0)
+#define audit_domain(_d) ((void)0)
+#define audit_domains() ((void)0)
+
#endif
+ int new_guest_cr3(unsigned long pfn);
+
+ void propagate_page_fault(unsigned long addr, u16 error_code);
+
/*
* Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must
* hold a reference to the page.